how to install kubernetes with binary files on centos8¶
env:
#append to hosts
192.168.0.202 master1
192.168.0.203 master2
192.168.0.204 master3
192.168.0.205 node1
192.168.0.206 node2
192.168.0.207 node3 #haproxy
cat hosts [nodes] example ansible_ssh_private_key_file=/secure/cert.pem ansible_ssh_user=user ansible -i hosts nodes -b -m copy -a "src=/env.sh dest=/env.sh" #-m script ansible -i hosts nodes -b -m shell -a "sh /env.sh"
install haproxy and other softwares(ignoring keepalived)
on node3
yum install haproxy -y cat > /etc/haproxy/haproxy.cfg <<EOF global daemon maxconn 1024 defaults mode http timeout connect 5000ms timeout client 50000ms timeout server 50000ms frontend kubernetes mode tcp bind *:6443 default_backend kubernetes-masters backend kubernetes-masters mode tcp balance roundrobin server master1 192.168.0.202:6443 server master2 192.168.0.203:6443 server master3 192.168.0.204:6443 EOF cat >> /etc/sysctl.conf <<EOF net.ipv4.ip_forward=1 net.ipv4.ip_nonlocal_bind=1 #for ka EOFon masters and nodes(install supervisor)
yum install python3 python3-devel python3-pip -y pip3 install supervisor #install pysocks if using socks proxy echo_supervisord_conf > /etc/supervisord.conf # don't forget to change sock file path on nodes(about ipvs and install docker-ce)yum install ipset ipvsadm -y cut -f1 -d " " /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4 yum install yum-utils -y yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo yum install -y https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm yum install docker-ce -y mkdir -p /etc/docker cat > daemon.json <<EOF { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ] } EOF systemctl start docker && systemctl enable dockerdownload binary files and extract
curl -LO https://github.com/etcd-io/etcd/releases/download/v3.3.25/etcd-v3.3.25-linux-amd64.tar.gz curl -LO https://dl.k8s.io/v1.19.0/kubernetes-server-linux-arm.tar.gz curl -LO https://github.com/coreos/flannel/releases/download/v0.12.0/flannel-v0.12.0-linux-amd64.tar.gz for binary in *.tar.gz;do tar zxvf $binary;done cp etcd etcdctl kubectl kube-apiserver kube-controller-manager kube-scheduler /usr/bin/ scp etcd etcdctl kubectl kube-apiserver kube-controller-manager kube-scheduler root@otherMasters:/usr/bin/ scp flanneld mk-docker-opts.sh kubectl kubelet kube-proxy root@nodes:/usr/bin/prepare certs and credentials
on master1
mkdir /certs && cd /certs curl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/bin/cfssl curl https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/bin/cfssljson curl https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/bin/cfssl-certinfo chmod +x /usr/bin/cfssl* cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "8760h" }, "profiles": { "kubernetes": { "usages": ["signing", "key encipherment", "server auth", "client auth"], "expiry": "8760h" } } } } EOF cat > ca-csr.json <<EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "HongKong", "ST": "HongKong", "O": "Kubernetes", "OU": "System" } ] } EOF cfssl gencert -initca ca-csr.json | cfssljson -bare ca cat > server-csr.json <<EOF { "CN": "kubernetes", "hosts": [ "127.0.0.1", "192.168.0.202", "192.168.0.203", "192.168.0.204", "192.168.0.207", "172.16.16.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "HongKong", "ST": "HongKong", "O": "Kubernetes", "OU": "System" } ] } EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server cat > admin-csr.json <<EOF { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "HongKong", "ST": "HongKong", "O": "System:Masters", "OU": "System" } ] } EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin cat > kube-proxy-csr.json <<EOF { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "HongKong", "ST": "HongKong", "O": "Kubernetes", "OU": "System" } ] } EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy cat > metrics-csr.json <<EOF { "CN": "aggregator", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "HongKong", "ST": "HongKong", "O": "kubernetes", "OU": "system" } ] } EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes metrics-csr.json | cfssljson -bare metrics TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/[:space:]" | dd bs=32 count=1 2>/dev/null) echo $TOKEN,kubelet-bootstrap,10001,'"system:kubelet-bootstrap"' >> tokens.csvprepare kubeconfig files(append config.yaml for metrics-server, kubelet-client-current.pem and kubelet-server-current.pem)
cd /certs kubectl config set-cluster kubernetes --certificate-authority=/certs/ca.pem --embed-certs=true --server=https://192.168.0.207:6443 --kubeconfig=bootstrap.kubeconfig kubectl config set-credentials kubelet-bootstrap --token=$TOKEN --kubeconfig=bootstrap.kubeconfig kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=bootstrap.kubeconfig kubectl config use-context default --kubeconfig=bootstrap.kubeconfig kubectl config set-cluster kubernetes --certificate-authority=/certs/ca.pem --embed-certs=true --server=https://192.168.0.207:6443 --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy --client-certificate=/certs/kube-proxy.pem --client-key=/certs/kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig kubectl config set-cluster kubernetes --certificate-authority=/certs/ca.pem --embed-certs=true --server=https://192.168.0.207:6443 --kubeconfig=kubectl.kubeconfig kubectl config set-credentials admin --client-certificate=/certs/admin.pem --client-key=/certs/admin-key.pem --embed-certs=true --kubeconfig=kubectl.kubeconfig kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kubectl.kubeconfig kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig cat > config.yaml <<EOF kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 authentication: x509: clientCAFile: "/certs/ca.pem" rotateCertificates: true rotateServerCertificates: true serverTLSBootstrap: true EOF scp ca.pem server.pem server-key.pem bootstrap.kubeconfig kube-proxy.kubeconfig config.yaml root@nodes:/certs/start etcd, apiserver, scheduler and controller manager
edit /etc/supervisord.conf, append these sections:
[program:etcd] command=/usr/bin/etcd --name="etcd1" --data-dir=/var/lib/etcd --listen-peer-urls="https://192.168.0.202:2380" --listen-client-urls="https://192.168.0.202:2379","http://127.0.0.1:2379" --advertise-client-urls="https://192.168.0.202:2379" --initial-advertise-peer-urls="https://192.168.0.202:2380" --initial-cluster="etcd1=https://192.168.0.202:2380,etcd2=https://192.168.0.203:2380,etcd3=https://192.168.0.204:2380" --initial-cluster-token="etcd-cluster" --initial-cluster-state="new" --cert-file=/certs/server.pem --key-file=/certs/server-key.pem --peer-cert-file=/certs/server.pem --peer-key-file=/certs/server-key.pem --trusted-ca-file=/certs/ca.pem --peer-trusted-ca-file=/certs/ca.pem directory=/var/lib/etcd priority=900 stderr_logfile=/var/lib/etcd/etcd.log [program:kube-apiserver] #command=/usr/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.0.202:2379,https://192.168.0.203:2379,https://192.168.0.204:2379 --insecure-bind-address=127.0.0.1 --bind-address=192.168.0.202 --insecure-port=8080 --secure-port=6443 --advertise-address=192.168.0.202 --allow-privileged=true --service-cluster-ip-range=172.16.0.0/16 --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/certs/token.csv --service-node-port-range=30000-60000 --tls-cert-file=/certs/server.pem --tls-private-key-file=/certs/server-key.pem --client-ca-file=/certs/ca.pem --service-account-key-file=/certs/ca-key.pem --etcd-cafile=/certs/ca.pem --etcd-certfile=/certs/server.pem --etcd-keyfile=/certs/server-key.pem command=/usr/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.0.202:2379,https://192.168.0.203:2379,https://192.168.0.204:2379 --insecure-bind-address=127.0.0.1 --bind-address=192.168.0.202 --insecure-port=8080 --secure-port=6443 --advertise-address=192.168.0.202 --allow-privileged=true --service-cluster-ip-range=172.16.0.0/16 --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/certs/token.csv --service-node-port-range=30000-60000 --tls-cert-file=/certs/server.pem --tls-private-key-file=/certs/server-key.pem --client-ca-file=/certs/ca.pem --service-account-key-file=/certs/ca-key.pem --etcd-cafile=/certs/ca.pem --etcd-certfile=/certs/server.pem --etcd-keyfile=/certs/server-key.pem --requestheader-client-ca-file=/certs/ca.pem --requestheader-allowed-names=aggregator --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --proxy-client-cert-file=/certs/metrics.pem --proxy-client-key-file=/certs/metrics-key.pem --enable-aggregator-routing=true --kubelet-client-certificate=/certs/server.pem --kubelet-client-key=/certs/server-key.pem directory=/var/log/kubernetes/ priority=901 stderr_logfile=/var/log/kubernetes/kube-apiserver.log [program:kube-scheduler] command=/usr/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect directory=/var/log/kubernetes/ priority=902 stderr_logfile=/var/log/kubernetes/kube-scheduler.log [program:kube-controller-manager] command=/usr/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=127.0.0.1 --service-cluster-ip-range=172.16.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/certs/ca.pem --cluster-signing-key-file=/certs/ca-key.pem --service-account-private-key-file=/certs/ca-key.pem --root-ca-file=/certs/ca.pem directory=/var/log/kubernetes/ priority=903 stderr_logfile=/var/log/kubernetes/kube-controller-manager.logsupervisord -c /etc/supervisord.confstart base software on nodes
edit /etc/supervisord.conf, append these sections:
[program:kubelet] #auto-generated kubelet.kubeconfig, we should use config.yaml #command=/usr/bin/kubelet --logtostderr=true --v=4 --address=192.168.0.205 --hostname-override=node1 --cert-dir=/certs/ --kubeconfig=/certs/kubelet.kubeconfig --bootstrap-kubeconfig=/certs/bootstrap.kubeconfig --cgroup-driver=systemd --cluster-domain=cluster.local --cluster-dns=172.16.16.16 --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 command=/usr/bin/kubelet --logtostderr=true --v=4 --address=192.168.0.205 --hostname-override=master1 --cert-dir=/certs/ --kubeconfig=/certs/kubelet.kubeconfig --bootstrap-kubeconfig=/certs/bootstrap.kubeconfig --cgroup-driver=systemd --cluster-domain=cluster.local --cluster-dns=172.16.16.16 --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 --config=/certs/config.yaml directory=/var/log/kubernetes/ priority=900 stderr_logfile=/var/log/kubernetes/kubelet.log [program:kube-proxy] command=/usr/bin/kube-proxy --logtostderr=true --v=4 --bind-address=192.168.0.205 --hostname-override=node1 --kubeconfig=/certs/kube-proxy.kubeconfig --proxy-mode=ipvs directory=/var/log/kubernetes/ priority=901 stderr_logfile=/var/log/kubernetes/kube-proxy.log [program:flanneld] command=/usr/bin/flanneld -etcd-endpoints="https://192.168.0.202:2379,https://192.168.0.203:2379,https://192.168.0.204:2379" -etcd-cafile=/certs/ca.pem -etcd-certfile=/certs/server.pem -etcd-keyfile=/certs/server-key.pem directory=/var/log/kubernetes/ priority=800 stderr_logfile=/var/log/kubernetes/flanneld.logkubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap kubectl create clusterrole system:kube-apiserver-to-kubelet --verb=* --resource=* kubectl create clusterrolebinding kube-apiserver-to-kubelet --clusterrole=system:kube-apiserver-to-kubelet --user=kubernetes # user=server-csr.json's CN mkdir -p /run/flannel/ etcdctl --cert-file /certs/server.pem --key-file /certs/server-key.pem --ca-file /certs/ca.pem set /coreos.com/network/config '{ "Network": "172.16.0.0/16", "Backend": {"Type": "vxlan"}}' etcdctl get /coreos.com/network/config supervisord -c /etc/supervisord.conf vi /usr/lib/systemd/system/docker.service EnvironmentFile=/run/flannel/subnet.env ExecStart=/usr/bin/dockerd --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} -H fd:// --containerd=/run/containerd/containerd.sock systemctl daemon-reload && systemctl restart dockerno node3
mv kubectl.kubeconfig /root/.kube/config kubectl get csr kubectl certificate approve node-csr-1lPcxRlCeuFRQ5_uzdZRqJZSMVBlonw-_YfyRoPJpW8 kubectl get noinstall coredns
curl -O https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed curl -O https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh ./deploy.sh -i 172.16.16.16 | kubectl apply -f -install metrics-server(hostPath /certs)
args: #- --cert-dir=/tmp - --tls-cert-file=/certs/server.pem - --tls-private-key-file=/certs/server-key.pem - --secure-port=4443 - --metric-resolution=30s #- --kubelet-insecure-tls - --kubelet-certificate-authority=/certs/ca.pem - --kubelet-preferred-address-types=Hostnamereferences:
https://v1-12.docs.kubernetes.io/docs/setup/scratch/
https://www.haproxy.org/download/1.8/doc/configuration.txt
https://coreos.com/flannel/docs/latest/running.html
https://kubernetes.io/docs/setup/production-environment/container-runtimes/
https://github.com/kubernetes/kubernetes/tree/master/pkg/proxy/ipvs